Setup cpu-local interrupt handline.
Add support for ipi event channels.
extern int cpu_idle(void);
+
+static irqreturn_t local_debug_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ xxprint("local_debug_interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction local_irq_debug = {
+ local_debug_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "ldebug",
+ NULL, NULL
+};
+
+void local_setup_debug(void)
+{
+ int time_irq;
+
+ time_irq = bind_virq_to_irq(VIRQ_DEBUG);
+ (void)setup_irq(time_irq, &local_irq_debug);
+}
+
+
+extern void setup_misdirect_virq(void);
+extern void local_setup_timer(void);
+
/*
* Activate a secondary processor.
*/
smp_callin();
while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
rep_nop();
-#if 1
- if (0) {
- char *msg = "start_secondary\n";
- char *msg2 = "delay2\n";
- int timeout;
- (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(msg), msg);
- for (timeout = 0; timeout < 50000; timeout++) {
- udelay(100);
- if (timeout == 20000) {
- (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(msg2), msg2);
- timeout = 0;
- }
- }
- }
- // enable_APIC_timer();
+ setup_misdirect_virq();
+ local_setup_timer();
+ local_setup_debug(); /* XXX */
+ local_irq_enable();
/*
* low-memory mappings have been cleared, flush them from
* the local TLBs too.
*/
- // local_flush_tlb();
+ local_flush_tlb();
cpu_set(smp_processor_id(), cpu_online_map);
wmb();
- if (10) {
+ if (01) {
char *msg2 = "delay2\n";
int timeout;
for (timeout = 0; timeout < 50000; timeout++) {
}
}
return cpu_idle();
-#else
- /*
- * Dont put anything before smp_callin(), SMP
- * booting is too fragile that we want to limit the
- * things done here to the most necessary things.
- */
- cpu_init();
- smp_callin();
- while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
- rep_nop();
- setup_secondary_APIC_clock();
- if (nmi_watchdog == NMI_IO_APIC) {
- disable_8259A_irq(0);
- enable_NMI_through_LVT0(NULL);
- enable_8259A_irq(0);
- }
- enable_APIC_timer();
- /*
- * low-memory mappings have been cleared, flush them from
- * the local TLBs too.
- */
- local_flush_tlb();
- cpu_set(smp_processor_id(), cpu_online_map);
- wmb();
- return cpu_idle();
-#endif
}
/*
last_update_from_xen = 0;
}
+#ifdef CONFIG_SMP
+static irqreturn_t local_timer_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ static int xxx = 0;
+ if ((xxx++ % 100) == 0)
+ xxprint("local_timer_interrupt\n");
+
+ /* XXX add processed_system_time loop thingy */
+ if (regs)
+ update_process_times(user_mode(regs));
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction local_irq_timer = {
+ local_timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "ltimer",
+ NULL, NULL
+};
+
+void local_setup_timer(void)
+{
+ int time_irq;
+
+ time_irq = bind_virq_to_irq(VIRQ_TIMER);
+ (void)setup_irq(time_irq, &local_irq_timer);
+}
+#endif
+
/*
* /proc/sys/xen: This really belongs in another file. It can stay here for
* now however.
static int irq_to_evtchn[NR_IRQS];
/* IRQ <-> VIRQ mapping. */
-static int virq_to_irq[NR_VIRQS];
+DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
+
+#define NR_IPIS 8
+/* IRQ <-> IPI mapping. */
+DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
/* Reference counts for bindings to IRQs. */
static int irq_bindcount[NR_IRQS];
{
evtchn_op_t op;
int evtchn, irq;
+ int cpu = smp_processor_id();
spin_lock(&irq_mapping_update_lock);
- if ( (irq = virq_to_irq[virq]) == -1 )
+ if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
{
op.cmd = EVTCHNOP_bind_virq;
op.u.bind_virq.virq = virq;
evtchn_to_irq[evtchn] = irq;
irq_to_evtchn[irq] = evtchn;
- virq_to_irq[virq] = irq;
+ per_cpu(virq_to_irq, cpu)[virq] = irq;
}
irq_bindcount[irq]++;
void unbind_virq_from_irq(int virq)
{
evtchn_op_t op;
- int irq = virq_to_irq[virq];
+ int cpu = smp_processor_id();
+ int irq = per_cpu(virq_to_irq, cpu)[virq];
int evtchn = irq_to_evtchn[irq];
spin_lock(&irq_mapping_update_lock);
evtchn_to_irq[evtchn] = -1;
irq_to_evtchn[irq] = -1;
- virq_to_irq[virq] = -1;
+ per_cpu(virq_to_irq, cpu)[virq] = -1;
+ }
+
+ spin_unlock(&irq_mapping_update_lock);
+}
+
+void bind_ipi_on_cpu(int cpu, int ipi)
+{
+ evtchn_op_t op;
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if (per_cpu(ipi_to_evtchn, cpu)[ipi] == 0) {
+ op.cmd = EVTCHNOP_bind_ipi;
+ op.u.bind_ipi.ipi_edom = cpu;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
+
+ per_cpu(ipi_to_evtchn, cpu)[ipi] = op.u.bind_ipi.port;
}
spin_unlock(&irq_mapping_update_lock);
}
+void unbind_ipi_on_cpu(int cpu, int ipi)
+{
+ evtchn_op_t op;
+ int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
+
+ spin_lock(&irq_mapping_update_lock);
+
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = DOMID_SELF;
+ op.u.close.port = evtchn;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
+
+ per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
+
+ spin_unlock(&irq_mapping_update_lock);
+}
+
int bind_evtchn_to_irq(int evtchn)
{
int irq;
NULL
};
+void setup_misdirect_virq(void)
+{
+ (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
+}
+
static irqreturn_t xen_dbg(int irq, void *dev_id, struct pt_regs *regs)
{
char *msg = "debug\n";
void irq_suspend(void)
{
int pirq, virq, irq, evtchn;
+ int cpu = smp_processor_id(); /* XXX */
/* Unbind VIRQs from event channels. */
for ( virq = 0; virq < NR_VIRQS; virq++ )
{
- if ( (irq = virq_to_irq[virq]) == -1 )
+ if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
continue;
evtchn = irq_to_evtchn[irq];
{
evtchn_op_t op;
int virq, irq, evtchn;
+ int cpu = smp_processor_id(); /* XXX */
for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
for ( virq = 0; virq < NR_VIRQS; virq++ )
{
- if ( (irq = virq_to_irq[virq]) == -1 )
+ if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
continue;
/* Get a new binding from Xen. */
void __init init_IRQ(void)
{
int i;
+ int cpu;
spin_lock_init(&irq_mapping_update_lock);
/* No VIRQ -> IRQ mappings. */
- for ( i = 0; i < NR_VIRQS; i++ )
- virq_to_irq[i] = -1;
+ for ( cpu = 0; cpu < NR_CPUS; cpu++ )
+ for ( i = 0; i < NR_VIRQS; i++ )
+ per_cpu(virq_to_irq, cpu)[i] = -1;
/* No event-channel -> IRQ mappings. */
for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
irq_desc[pirq_to_irq(i)].handler = &pirq_type;
}
- (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
+ (void)setup_misdirect_virq();
printk("debug_int\n");
(void)setup_irq(bind_virq_to_irq(VIRQ_DEBUG), &xen_action);
/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
extern int bind_virq_to_irq(int virq);
extern void unbind_virq_from_irq(int virq);
+extern int bind_ipi_on_cpu(int cpu, int ipi);
+extern void unbind_ipi_on_cpu(int cpu, int ipi);
extern int bind_evtchn_to_irq(int evtchn);
extern void unbind_evtchn_from_irq(int evtchn);
return port;
bind->port = port;
+ printk("evtchn_bind_virq %d/%d virq %d -> %d\n",
+ d->id, ed->eid, virq, port);
+ return 0;
+}
+
+static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
+{
+ struct exec_domain *ed = current;
+ struct domain *d = ed->domain;
+ int port, ipi_edom = bind->ipi_edom;
+
+ spin_lock(&d->event_channel_lock);
+
+ if ( (port = get_free_port(d)) >= 0 )
+ {
+ d->event_channel[port].state = ECS_IPI;
+ d->event_channel[port].u.ipi_edom = ipi_edom;
+ }
+
+ spin_unlock(&d->event_channel_lock);
+
+ if ( port < 0 )
+ return port;
+
+ bind->port = port;
+ printk("evtchn_bind_ipi %d/%d ipi_edom %d -> %d\n",
+ d->id, current->eid, ipi_edom, port);
return 0;
}
return rc;
bind->port = port;
+ printk("evtchn_bind_pirq %d/%d pirq %d -> port %d\n",
+ d->id, current->eid, pirq, port);
return 0;
}
ed->virq_to_evtchn[chn1[port1].u.virq] = 0;
break;
+ case ECS_IPI:
+ break;
+
case ECS_INTERDOMAIN:
if ( d2 == NULL )
{
status->status = EVTCHNSTAT_virq;
status->u.virq = chn[port].u.virq;
break;
+ case ECS_IPI:
+ status->status = EVTCHNSTAT_ipi;
+ status->u.ipi_edom = chn[port].u.ipi_edom;
+ break;
default:
BUG();
}
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
+ case EVTCHNOP_bind_ipi:
+ rc = evtchn_bind_ipi(&op.u.bind_ipi);
+ if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
+ rc = -EFAULT; /* Cleaning up here would be a mess! */
+ break;
+
case EVTCHNOP_bind_pirq:
rc = evtchn_bind_pirq(&op.u.bind_pirq);
if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
ed->ed_flags,
ed->vcpu_info->evtchn_upcall_pending,
ed->vcpu_info->evtchn_upcall_mask);
+ printk("Notifying guest... %d/%d\n", d->id, ed->eid);
+ printk("port %d/%d stat %d %d %d\n",
+ VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
+ test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_pending[0]),
+ test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_mask[0]),
+ test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, &ed->vcpu_info->evtchn_pending_sel));
+ send_guest_virq(ed, VIRQ_DEBUG);
}
- ed = d->exec_domain[0];
- printk("Notifying guest... %d/%d\n", d->id, ed->eid);
- printk("port %d/%d stat %d %d %d\n",
- VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
- test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_pending[0]),
- test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_mask[0]),
- test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, &ed->vcpu_info->evtchn_pending_sel));
- send_guest_virq(d->exec_domain[0], VIRQ_DEBUG);
}
read_unlock(&domlist_lock);
#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
+#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
u32 status; /* 8 */
union { /* 12 */
struct {
} PACKED interdomain; /* EVTCHNSTAT_interdomain */
u32 pirq; /* EVTCHNSTAT_pirq */ /* 12 */
u32 virq; /* EVTCHNSTAT_virq */ /* 12 */
+ u32 ipi_edom; /* EVTCHNSTAT_ipi */ /* 12 */
} PACKED u;
} PACKED evtchn_status_t; /* 20 bytes */
+/*
+ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
+ */
+#define EVTCHNOP_bind_ipi 7
+typedef struct {
+ /* IN parameters. */
+ u32 ipi_edom; /* 0 */
+ /* OUT parameters. */
+ u32 port; /* 4 */
+} PACKED evtchn_bind_ipi_t; /* 8 bytes */
+
+
typedef struct {
u32 cmd; /* EVTCHNOP_* */ /* 0 */
u32 __reserved; /* 4 */
evtchn_close_t close;
evtchn_send_t send;
evtchn_status_t status;
+ evtchn_bind_ipi_t bind_ipi;
u8 __dummy[24];
} PACKED u;
} PACKED evtchn_op_t; /* 32 bytes */
#define ECS_INTERDOMAIN 2 /* Channel is bound to another domain. */
#define ECS_PIRQ 3 /* Channel is bound to a physical IRQ line. */
#define ECS_VIRQ 4 /* Channel is bound to a virtual IRQ line. */
+#define ECS_IPI 5 /* Channel is bound to a virtual IPI line. */
u16 state;
union {
struct {
} __attribute__ ((packed)) interdomain; /* state == ECS_INTERDOMAIN */
u16 pirq; /* state == ECS_PIRQ */
u16 virq; /* state == ECS_VIRQ */
+ u32 ipi_edom; /* state == ECS_IPI */
} u;
} event_channel_t;